ret = 0;
hypercall_page = map_domain_page(mfn);
- hypercall_page_initialise(hypercall_page);
+ hypercall_page_initialise(d, hypercall_page);
unmap_domain_page(hypercall_page);
put_page_and_type(mfn_to_page(mfn));
#if defined(__i386__)
regs->eax = op;
- if ( supervisor_mode_kernel )
+ if ( supervisor_mode_kernel || hvm_guest(current) )
regs->eip &= ~31; /* re-execute entire hypercall entry stub */
else
regs->eip -= 2; /* re-execute 'int 0x82' */
return -1;
}
- hypercall_page_initialise((void *)hypercall_page);
+ hypercall_page_initialise(d, (void *)hypercall_page);
}
/* Copy the initial ramdisk. */
#include <xen/softirq.h>
#include <xen/domain.h>
#include <xen/domain_page.h>
+#include <xen/hypercall.h>
#include <asm/current.h>
#include <asm/io.h>
#include <asm/shadow.h>
pbuf[(*index)++] = c;
}
+#if defined(__i386__)
+
+typedef unsigned long hvm_hypercall_t(
+ unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+#define HYPERCALL(x) [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
+static hvm_hypercall_handler *hvm_hypercall_table[] = {
+ HYPERCALL(mmu_update),
+ HYPERCALL(memory_op),
+ HYPERCALL(multicall),
+ HYPERCALL(update_va_mapping),
+ HYPERCALL(event_channel_op_compat),
+ HYPERCALL(xen_version),
+ HYPERCALL(grant_table_op),
+ HYPERCALL(event_channel_op),
+ HYPERCALL(hvm_op)
+};
+#undef HYPERCALL
+
+void hvm_do_hypercall(struct cpu_user_regs *pregs)
+{
+ if ( ring_3(pregs) )
+ {
+ pregs->eax = -EPERM;
+ return;
+ }
+
+ if ( pregs->eax > ARRAY_SIZE(hvm_hypercall_table) ||
+ !hvm_hypercall_table[pregs->eax] )
+ {
+ DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
+ current->domain->domain_id, current->vcpu_id,
+ pregs->eax);
+ pregs->eax = -ENOSYS;
+ }
+ else
+ {
+ pregs->eax = hvm_hypercall_table[pregs->eax](
+ pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
+ }
+}
+
+#else /* __x86_64__ */
+
+void hvm_do_hypercall(struct cpu_user_regs *pregs)
+{
+ printk("not supported yet!\n");
+}
+
+#endif
+
+/* Initialise a hypercall transfer page for a VMX domain using
+ paravirtualised drivers. */
+void hvm_hypercall_page_initialise(struct domain *d,
+ void *hypercall_page)
+{
+ hvm_funcs.init_hypercall_page(d, hypercall_page);
+}
+
+
/*
* only called in HVM domain BSP context
* when booting, vcpuid is always equal to apic_id
ctxt->flags = VGCF_HVM_GUEST;
}
+static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
+{
+ char *p;
+ int i;
+
+ memset(hypercall_page, 0, PAGE_SIZE);
+
+ for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+ {
+ p = (char *)(hypercall_page + (i * 32));
+ *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
+ *(u32 *)(p + 1) = i;
+ *(u8 *)(p + 5) = 0x0f; /* vmmcall */
+ *(u8 *)(p + 6) = 0x01;
+ *(u8 *)(p + 7) = 0xd9;
+ *(u8 *)(p + 8) = 0xc3; /* ret */
+ }
+
+ /* Don't support HYPERVISOR_iret at the moment */
+ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
+}
+
int start_svm(void)
{
u32 eax, ecx, edx;
hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
hvm_funcs.init_ap_context = svm_init_ap_context;
+ hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
+
hvm_enabled = 1;
return 1;
return result;
}
-static inline void svm_do_msr_access(struct vcpu *v, struct cpu_user_regs *regs)
+static inline void svm_do_msr_access(
+ struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int inst_len;
u64 msr_content=0;
+ u32 eax, edx;
ASSERT(vmcb);
default:
if (long_mode_do_msr_read(regs))
goto done;
+
+ if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
+ {
+ regs->eax = eax;
+ regs->edx = edx;
+ goto done;
+ }
+
rdmsr_safe(regs->ecx, regs->eax, regs->edx);
break;
}
vlapic_msr_set(VLAPIC(v), msr_content);
break;
default:
- long_mode_do_msr_write(regs);
+ if ( !long_mode_do_msr_write(regs) )
+ wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx);
break;
}
}
return 1;
}
+static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
+{
+ char *p;
+ int i;
+
+ memset(hypercall_page, 0, PAGE_SIZE);
+
+ for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+ {
+ p = (char *)(hypercall_page + (i * 32));
+ *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
+ *(u32 *)(p + 1) = i;
+ *(u8 *)(p + 5) = 0x0f; /* vmcall */
+ *(u8 *)(p + 6) = 0x01;
+ *(u8 *)(p + 7) = 0xc1;
+ *(u8 *)(p + 8) = 0xc3; /* ret */
+ }
+
+ /* Don't support HYPERVISOR_iret at the moment */
+ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
+}
+
int start_vmx(void)
{
u32 eax, edx;
hvm_funcs.init_ap_context = vmx_init_ap_context;
+ hvm_funcs.init_hypercall_page = vmx_init_hypercall_page;
+
hvm_enabled = 1;
return 1;
static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
{
u64 msr_content = 0;
+ u32 eax, edx;
struct vcpu *v = current;
HVM_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
msr_content = VLAPIC(v) ? VLAPIC(v)->apic_base_msr : 0;
break;
default:
- if(long_mode_do_msr_read(regs))
+ if (long_mode_do_msr_read(regs))
return;
+
+ if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
+ {
+ regs->eax = eax;
+ regs->edx = edx;
+ return;
+ }
+
rdmsr_safe(regs->ecx, regs->eax, regs->edx);
break;
}
vlapic_msr_set(VLAPIC(v), msr_content);
break;
default:
- long_mode_do_msr_write(regs);
+ if ( !long_mode_do_msr_write(regs) )
+ wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx);
break;
}
__update_guest_eip(inst_len);
break;
}
-#if 0 /* keep this for debugging */
case EXIT_REASON_VMCALL:
+ {
__get_instruction_length(inst_len);
__vmread(GUEST_RIP, &eip);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
- hvm_print_line(v, regs.eax); /* provides the current domain */
+ hvm_do_hypercall(®s);
__update_guest_eip(inst_len);
break;
-#endif
+ }
case EXIT_REASON_CR_ACCESS:
{
__vmread(GUEST_RIP, &eip);
case EXIT_REASON_MWAIT_INSTRUCTION:
__hvm_bug(®s);
break;
- case EXIT_REASON_VMCALL:
case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH:
case EXIT_REASON_VMPTRLD:
#include <xen/symbols.h>
#include <xen/iocap.h>
#include <xen/nmi.h>
+#include <xen/version.h>
#include <asm/shadow.h>
#include <asm/system.h>
#include <asm/io.h>
DO_ERROR(17, "alignment check", alignment_check)
DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
+int rdmsr_hypervisor_regs(
+ uint32_t idx, uint32_t *eax, uint32_t *edx)
+{
+ idx -= 0x40000000;
+ if ( idx > 0 )
+ return 0;
+
+ *eax = *edx = 0;
+ return 1;
+}
+
+int wrmsr_hypervisor_regs(
+ uint32_t idx, uint32_t eax, uint32_t edx)
+{
+ struct domain *d = current->domain;
+
+ idx -= 0x40000000;
+ if ( idx > 0 )
+ return 0;
+
+ switch ( idx )
+ {
+ case 0:
+ {
+ void *hypercall_page;
+ unsigned long mfn;
+ unsigned long gmfn = ((unsigned long)edx << 20) | (eax >> 12);
+ unsigned int idx = eax & 0xfff;
+
+ if ( idx > 0 )
+ {
+ DPRINTK("Dom%d: Out of range index %u to MSR %08x\n",
+ d->domain_id, idx, 0x40000000);
+ return 0;
+ }
+
+ mfn = gmfn_to_mfn(d, gmfn);
+
+ if ( !mfn_valid(mfn) ||
+ !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ {
+ DPRINTK("Dom%d: Bad GMFN %lx (MFN %lx) to MSR %08x\n",
+ d->domain_id, gmfn, mfn, 0x40000000);
+ return 0;
+ }
+
+ hypercall_page = map_domain_page(mfn);
+ hypercall_page_initialise(d, hypercall_page);
+ unmap_domain_page(hypercall_page);
+
+ put_page_and_type(mfn_to_page(mfn));
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return 1;
+}
+
int cpuid_hypervisor_leaves(
uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
- if ( (idx < 0x40000000) || (idx > 0x40000000) )
+ idx -= 0x40000000;
+ if ( idx > 2 )
return 0;
- switch ( idx - 0x40000000 )
+ switch ( idx )
{
case 0:
- *eax = 0x40000000;
- *ebx = 0x006e6558; /* "Xen\0" */
- *ecx = *edx = 0;
+ *eax = 0x40000002; /* Largest leaf */
+ *ebx = 0x566e6558; /* Signature 1: "XenV" */
+ *ecx = 0x65584d4d; /* Signature 2: "MMXe" */
+ *edx = 0x4d4d566e; /* Signature 3: "nVMM" */
+ break;
+
+ case 1:
+ *eax = (xen_major_version() << 16) | xen_minor_version();
+ *ebx = 0; /* Reserved */
+ *ecx = 0; /* Reserved */
+ *edx = 0; /* Reserved */
+ break;
+
+ case 2:
+ *eax = 1; /* Number of hypercall-transfer pages */
+ *ebx = 0x40000000; /* MSR base address */
+ *ecx = 0; /* Features 1 */
+ *edx = 0; /* Features 2 */
break;
default:
break;
#endif
default:
+ if ( wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx) )
+ break;
+
if ( (rdmsr_safe(regs->ecx, l, h) != 0) ||
(regs->eax != l) || (regs->edx != h) )
DPRINTK("Domain attempted WRMSR %p from "
goto fail;
break;
default:
+ if ( rdmsr_hypervisor_regs(regs->ecx, &l, &h) )
+ {
+ regs->eax = l;
+ regs->edx = h;
+ break;
+ }
/* Everyone can read the MSR space. */
/*DPRINTK("Domain attempted RDMSR %p.\n", _p(regs->ecx));*/
if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) )
*(u16 *)(p+ 6) = 0x82cd; /* int $0x82 */
}
-void hypercall_page_initialise(void *hypercall_page)
+void hypercall_page_initialise(struct domain *d, void *hypercall_page)
{
- if ( supervisor_mode_kernel )
+ if ( hvm_guest(d->vcpu[0]) )
+ hvm_hypercall_page_initialise(d, hypercall_page);
+ else if ( supervisor_mode_kernel )
hypercall_page_initialise_ring0_kernel(hypercall_page);
else
hypercall_page_initialise_ring1_kernel(hypercall_page);
return 0;
}
-void hypercall_page_initialise(void *hypercall_page)
+static void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
{
char *p;
int i;
*(u16 *)(p+ 9) = 0x050f; /* syscall */
}
+void hypercall_page_initialise(struct domain *d, void *hypercall_page)
+{
+ if ( hvm_guest(d->vcpu[0]) )
+ hvm_hypercall_page_initialise(d, hypercall_page);
+ else
+ hypercall_page_initialise_ring3_kernel(hypercall_page);
+}
+
/*
* Local variables:
* mode: C
* Initialise a hypercall-transfer page. The given pointer must be mapped
* in Xen virtual address space (accesses are not validated or checked).
*/
-extern void hypercall_page_initialise(void *);
+extern void hypercall_page_initialise(struct domain *d, void *);
struct arch_domain
{
void (*init_ap_context)(struct vcpu_guest_context *ctxt,
int vcpuid, int trampoline_vector);
+
+ void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
};
extern struct hvm_function_table hvm_funcs;
return hvm_funcs.instruction_length(v);
}
+void hvm_hypercall_page_initialise(struct domain *d,
+ void *hypercall_page);
+
static inline unsigned long
hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
{
extern void hvm_print_line(struct vcpu *v, const char c);
extern void hlt_timer_fn(void *data);
+void hvm_do_hypercall(struct cpu_user_regs *pregs);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
int cpuid_hypervisor_leaves(
uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+int rdmsr_hypervisor_regs(
+ uint32_t idx, uint32_t *eax, uint32_t *edx);
+int wrmsr_hypervisor_regs(
+ uint32_t idx, uint32_t eax, uint32_t edx);
#endif /* !__ASSEMBLY__ */